tb->cs = ti->cs;
tb->eip = ti->address;
if ( TI_GET_IF(ti) )
- d->shared_info->vcpu_data[0].evtchn_upcall_mask = 1;
+ ed->vcpu_info->evtchn_upcall_mask = 1;
- return;
+ return 0;
xen_fault:
DO_ERROR(17, "alignment check", alignment_check)
DO_ERROR_NOCODE(19, "simd error", simd_coprocessor_error)
- asmlinkage void do_int3(struct xen_regs *regs, long error_code)
+ asmlinkage int do_int3(struct xen_regs *regs, long error_code)
{
- struct domain *d = current;
- struct trap_bounce *tb = &d->thread.trap_bounce;
+ struct exec_domain *ed = current;
+ struct trap_bounce *tb = &ed->thread.trap_bounce;
trap_info_t *ti;
DEBUGGER_trap_entry(TRAP_int3, regs, error_code);
tb->cs = ti->cs;
tb->eip = ti->address;
if ( TI_GET_IF(ti) )
- d->shared_info->vcpu_data[0].evtchn_upcall_mask = 1;
+ ed->vcpu_info->evtchn_upcall_mask = 1;
+
+ return 0;
}
asmlinkage void do_double_fault(void)
unlikely((addr >> L2_PAGETABLE_SHIFT) ==
ptwr_info[cpu].ptinfo[PTWR_PT_ACTIVE].l2_idx) )
{
+ LOCK_BIGLOCK(d);
ptwr_flush(PTWR_PT_ACTIVE);
- return;
+ UNLOCK_BIGLOCK(d);
+ return EXCRET_fault_fixed;
}
if ( (addr < PAGE_OFFSET) &&
((error_code & 3) == 3) && /* write-protection fault */
ptwr_do_page_fault(addr) )
- return;
+ {
+ if ( unlikely(d->mm.shadow_mode) )
+ (void)shadow_fault(addr, error_code);
+ return EXCRET_fault_fixed;
+ }
}
- if ( unlikely(d->mm.shadow_mode) &&
+ if ( unlikely(ed->mm.shadow_mode) &&
(addr < PAGE_OFFSET) && shadow_fault(addr, error_code) )
- return; /* Returns TRUE if fault was handled. */
+ return EXCRET_fault_fixed;
- if ( unlikely(addr >= LDT_VIRT_START) &&
- (addr < (LDT_VIRT_START + (d->mm.ldt_ents*LDT_ENTRY_SIZE))) )
+ if ( unlikely(addr >= LDT_VIRT_START(ed)) &&
+ (addr < (LDT_VIRT_START(ed) + (ed->mm.ldt_ents*LDT_ENTRY_SIZE))) )
{
/*
* Copy a mapping from the guest's LDT, if it is valid. Otherwise we
* send the fault up to the guest OS to be handled.
*/
- off = addr - LDT_VIRT_START;
- addr = d->mm.ldt_base + off;
- if ( likely(map_ldt_shadow_page(off >> PAGE_SHIFT)) )
+ LOCK_BIGLOCK(d);
+ off = addr - LDT_VIRT_START(ed);
+ addr = ed->mm.ldt_base + off;
+ ret = map_ldt_shadow_page(off >> PAGE_SHIFT);
+ UNLOCK_BIGLOCK(d);
+ if ( likely(ret) )
- return; /* successfully copied the mapping */
+ return EXCRET_fault_fixed; /* successfully copied the mapping */
}
if ( unlikely(!(regs->cs & 3)) )
tb->cs = ti->cs;
tb->eip = ti->address;
if ( TI_GET_IF(ti) )
- d->shared_info->vcpu_data[0].evtchn_upcall_mask = 1;
+ ed->vcpu_info->evtchn_upcall_mask = 1;
- return;
+ return 0;
xen_fault:
if ( likely((fixup = search_exception_table(regs->eip)) != 0) )
{
perfc_incrc(copy_user_faults);
- if ( !d->mm.shadow_mode )
+ if ( !ed->mm.shadow_mode )
DPRINTK("Page fault: %08lx -> %08lx\n", regs->eip, fixup);
regs->eip = fixup;
- return;
+ return 0;
}
DEBUGGER_trap_fatal(TRAP_page_fault, regs, error_code);
"[error_code=%08x]\n"
"Faulting linear address might be %08lx\n",
smp_processor_id(), error_code, addr);
+ return 0;
}
- asmlinkage void do_general_protection(struct xen_regs *regs, long error_code)
+ asmlinkage int do_general_protection(struct xen_regs *regs, long error_code)
{
- struct domain *d = current;
- struct trap_bounce *tb = &d->thread.trap_bounce;
+ struct exec_domain *ed = current;
+ struct domain *d = ed->domain;
+ struct trap_bounce *tb = &ed->thread.trap_bounce;
trap_info_t *ti;
unsigned long fixup;
tb->cs = ti->cs;
tb->eip = ti->address;
if ( TI_GET_IF(ti) )
- d->shared_info->vcpu_data[0].evtchn_upcall_mask = 1;
+ ed->vcpu_info->evtchn_upcall_mask = 1;
- return;
+ return 0;
gp_in_kernel:
return;
if ( test_and_clear_bit(0, &nmi_softirq_reason) )
- send_guest_virq(dom0, VIRQ_PARITY_ERR);
+ send_guest_virq(dom0->exec_domain[0], VIRQ_PARITY_ERR);
if ( test_and_clear_bit(1, &nmi_softirq_reason) )
- send_guest_virq(dom0, VIRQ_IO_ERR);
+ send_guest_virq(dom0->exec_domain[0], VIRQ_IO_ERR);
}
- asmlinkage void math_state_restore(struct xen_regs *regs, long error_code)
+ asmlinkage int math_state_restore(struct xen_regs *regs, long error_code)
{
/* Prevent recursion. */
clts();
tb->cs = current->thread.traps[7].cs;
tb->eip = current->thread.traps[7].address;
}
+
+ return EXCRET_fault_fixed;
}
- asmlinkage void do_debug(struct xen_regs *regs, long error_code)
+ asmlinkage int do_debug(struct xen_regs *regs, long error_code)
{
unsigned int condition;
- struct domain *d = current;
+ struct exec_domain *d = current;
struct trap_bounce *tb = &d->thread.trap_bounce;
DEBUGGER_trap_entry(TRAP_debug, regs, error_code);